Meta-analysis of avian responses and butterfly eyespots

Author

Mizuno et al.

Published

September 8, 2023

1 Setting ups

Load packages and custum function. Custum function is used for calculating effect size (lnRR) and its variance from raw data.

Code
pacman::p_load(ape,
               DT,
               ggokabeito,
               ggcorrplot,
               here, 
               knitr,
               tidyverse,
               patchwork,
               metafor,
               miWQS,
               orchaRd,
               parallel,
               uncoveR)

# function for calculating effect size (lnRR)
effect_lnRR <- function(dt) {
  # replace 0 in "C_mean", "T_sd", "C_sd", "C_proportion" with each minimum values
  # if proportion too extreme value, replace minimum value (only "T_proportion")

  dt <- dt %>%
    mutate(C_mean = ifelse(C_mean == 0, 0.04, C_mean))
  dt <- dt %>%
    mutate(T_sd = ifelse(T_sd == 0, 0.01, T_sd))
  dt <- dt %>%
    mutate(C_sd = ifelse(C_sd == 0, 0.05, C_sd))
  dt <- dt %>%
    mutate(C_proportion = ifelse(C_proportion == 0, 0.01, C_proportion))
  dt <- dt %>%
    mutate(T_proportion = ifelse(T_proportion < 0.01, 0.01, T_proportion))
  dt <- dt %>%
    mutate(T_proportion = ifelse(T_proportion  == 1, 0.9, T_proportion))
    
  # copy dataset for adding effect size and its variation (lnRR /lnRR_var) column
  dt1 <- dt %>%
    mutate(
      lnRR = NA,
      lnRR_var = NA
    )

  for (i in seq_len(nrow(dt1))) {
    Tn <- dt1$Tn[i]
    Cn <- dt1$Cn[i]
    T_mean <- dt1$T_mean[i]
    C_mean <- dt1$C_mean[i]
    T_proportion <- dt1$T_proportion[i]
    C_proportion <- dt1$C_proportion[i]
    T_sd <- dt1$T_sd[i]
    C_sd <- dt1$C_sd[i]
    Response <- dt1$Response[i]
    Measure <- dt1$Measure[i]
    Study_design <- dt1$Study_design[i]
    Direction <- dt1$Direction[i]

    # continuous data - using escalc() (metafor package)
    if (Response == "continuous" & Study_design == "independent") {

      effect <- escalc(
        measure = "ROM",
        n1i = Tn, n2i = Cn,
        m1i = T_mean, m2 = C_mean,
        sd1i = T_sd, sd2i = C_sd,
        var.names = c("lnRR", "lnRR_var")
      )

      dt1$lnRR[i] <- effect$lnRR
      dt1$lnRR_var[i] <- effect$lnRR_var
    } else if (Response == "continuous" & Study_design == "dependent") {

      effect <- escalc(
        measure = "ROMC",
        ni = (Tn + Cn) / 2,
        m1i = T_mean, m2 = C_mean,
        sd1i = T_sd, sd2i = C_sd,
        ri = 0.5,
        var.names = c("lnRR", "lnRR_var")
      )

      dt1$lnRR[i] <- effect$lnRR
      dt1$lnRR_var[i] <- effect$lnRR_var
    }

    # proportion data (no sd values)
    else if (Response == "proportion1" & Study_design == "independent") {
      
      T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )

      # transform proportion value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro1 <- log(T_proportion / C_proportion)
      lnRR_var_pro1 <- (1 / sqrt(8))^2 * (1 / (T_proportion^2 * Tn) +
        1 / (C_proportion^2 * Cn))

      dt1$lnRR[i] <- lnRR_pro1
      dt1$lnRR_var[i] <- lnRR_var_pro1

    } else if (Response == "proportion1" & Study_design == "dependent") {
      
      T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )

      # transform proportion value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro1 <- log(T_proportion / C_proportion)
      lnRR_var_pro1 <- (1 / sqrt(8))^2 * (1 / (T_proportion^2 * Tn) +
        1 / (C_proportion^2 * Cn)) -
        2 * 0.5 * sqrt((1 / sqrt(8))^2 * (1 / (T_proportion^2 * Tn))) *
          sqrt((1 / sqrt(8))^2 * (1 / (C_proportion^2 * Cn)))

      dt1$lnRR[i] <- lnRR_pro1
      dt1$lnRR_var[i] <- lnRR_var_pro1
    }

    # proportion data (exist sd values)
    else if (Response == "proportion2" & Study_design == "independent") {
      
        T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )
      
      # transform proportion mean value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_SD <- T_sd^2 / (4 * (T_proportion) * (1 - (T_proportion)))
      C_SD <- C_sd^2 / (4 * (C_proportion) * (1 - (C_proportion)))

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro2 <- log(T_proportion / C_proportion)
      lnRR_var_pro2 <- (T_SD)^2 * (1 / (T_proportion^2 * Tn)) +
        (C_SD)^2 * (1 / (C_proportion^2 * Cn))

      dt1$lnRR[i] <- lnRR_pro2
      dt1$lnRR_var[i] <- lnRR_var_pro2

    } else if (Response == "proportion2" & Study_design == "dependent") {
      
        T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )
      
      # transform proportion mean value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_SD <- T_sd^2 / (4 * (T_proportion) * (1 - (T_proportion)))
      C_SD <- C_sd^2 / (4 * (C_proportion) * (1 - (C_proportion)))

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro2 <- log(T_proportion / C_proportion)
      lnRR_var_pro2 <- (T_SD)^2 * (1 / (T_proportion^2 * Tn)) +
        (C_SD)^2 * (1 / (C_proportion^2 * Cn)) -
        2 * 0.5 * sqrt((T_SD)^2 * (1 / (T_proportion^2 * Tn))) *
          sqrt((C_SD)^2 * (1 / (C_proportion^2 * Cn)))

      dt1$lnRR[i] <- lnRR_pro2
      dt1$lnRR_var[i] <- lnRR_var_pro2
    }
  }

  return(dt1)
}

2 Prepare datasets for analysis

First, we used a predator dataset to check whether we need to consider phylogeney. If so, we will use predator and prey datasets for following meta-analysis.

Code
dat_predator <- read.csv(here("data/predator_22072023.csv"), header = T, fileEncoding = "CP932")

dat_all <-  read.csv(here("data/all_15082023.csv"), header = T, fileEncoding = "CP932")


datatable(dat_predator, caption = "Predator datasets", options = list(pageLength = 10, scrollX = TRUE))

Table 1 - Predator datasets

Code
trees <- read.nexus(here("data/bird_phy.nex"))
plot(trees[1])

Figure 1— Phylogenetic tree of bird species
Code
dat_pred <- effect_lnRR(dat_predator)
dat <- effect_lnRR(dat_all)

# add obseravation id
dat_pred$Obs_ID <- 1:nrow(dat_pred)
dat$Obs_ID <- 1:nrow(dat)

# the data of diamete, area, and background are log-transformed because it is skew data
dat$Log_diameter <- log(dat$Diameter_pattern)
dat$Log_area <- log(dat$Area_pattern)
dat$Log_background <- log(dat$Area_background)


# use vcalc to calculate variance-covariance matrix
VCV_pred <- vcalc(vi = lnRR_var, 
                  cluster = Cohort_ID, 
                  subgroup = Obs_ID, 
                  data = dat_pred)

VCV <- vcalc(vi = lnRR_var, 
             cluster = Cohort_ID,
             obs = Obs_ID,
             rho = 0.5,
             data = dat)

I cannot attach the caption to datatable() format table. I need to use kable() format table?

Code
datatable(dat, caption = "Dataset for meta-analysis", 
          options = list(pageLength = 10, scrollX = TRUE))

Table 2 - Dataset for meta-analysis

If phylogeny do not explain heterogeniety much, we will not need to consider it and the two datasets can be merged.

We used meta-analytical models to calculate total I2 (a measure of heterogeneity not caused by sampling error; Higgins. et al., 2003) and the partial I2 explained by each random factor.

Based on this, we need not to consider phylogenetic relatedness from our meta-regressions as this random factor did not explain much of the heterogeneity between effect sizes (partial I2 < 0.001%), then we can combine two datasets (predator and prey) for meta-analysis.

Code
phy_model <- function(cor_tree = vcv_tree){
            model <- rma.mv(yi = lnRR,
                            V = VCV_pred,
                            random = list(~1 | Study_ID,
                                          ~1 | Shared_control_ID,
                                          ~1 | Cohort_ID,
                                          ~1 | Obs_ID,
                                          ~1 | Bird_species,
                                          ~1 | Phylogeny),
                            R = list(Phylogeny = cor_tree),
                            test = "t",
                            method = "REML",
                            sparse = TRUE,
                            data = dat_pred)
  model
}

tree_50 <- trees[1:50]
vcv_tree_50 <- map(tree_50, ~vcv(.x, corr = TRUE))

# Run multiple meta-analyses with 50 different trees and obtain the combined result

ma_50 <- mclapply(vcv_tree_50, phy_model, mc.cores = 8) 
Code
ma_50 <- readRDS(here("Rdata", "ma_50.RDS"))

# combining the results
est_50 <- map_dbl(ma_50, ~ .x$b[[1]])
se_50 <-  map_dbl(ma_50, ~ .x$se)
df_50 <- c(rbind(est_50, se_50))

# creating an array required by pool.mi
my_array <- array(df_50, dim = c(1, 2, 50))

com_res <- round(pool.mi(my_array), 4)
Code
knitr::kable(com_res, caption = "Combined result of 50 phylogenetic trees")
Combined result of 50 phylogenetic trees
pooled.mean pooled.total.se se.within se.between relative.inc.var frac.miss.info CI.1 CI.2 p.value
0.1394 0.1192 0.1192 0 0 0 -0.0943 0.3731 0.2424
Code
sigma2_mod <- do.call(rbind, lapply(ma_50, function(x) x$sigma2))
sigma2_mod <- data.frame(sigma2_mod)

colnames(sigma2_mod) <- c("sigma^2.1_Study_ID", "sigma^2.2_SharedControl_ID", 
                          "sigma^2.3_Cohort_ID", "sigma^2.4_Obs_ID", 
                          "sigma^2.5_BirdSpecies", "sigma^2.6_Phylogeny")

sigma2_mod <- round(colMeans(sigma2_mod), 4)

knitr::kable(sigma2_mod, caption = "Average variance components")
Average variance components
x
sigma^2.1_Study_ID 0.0000
sigma^2.2_SharedControl_ID 0.0923
sigma^2.3_Cohort_ID 0.1009
sigma^2.4_Obs_ID 0.5323
sigma^2.5_BirdSpecies 0.0000
sigma^2.6_Phylogeny 0.0000
We got 1000 phylogenetic trees from BirdTree.org

Only 50 trees are used as in Nakagawa & Villemereuil (2019)

3 Meta-analysis

We used meta-analytical models to calculate total I2 (a measure of heterogeneity not caused by sampling error; Higgins. et al., 2003) and the partial I2 explained by each random factor.

Which random effects remove in our models?

We can remove Shared control ID and Cohort ID

Code
# I exclude cohort_ID because sigma^2.2 = 0 and I2 = 0
ma_all <- rma.mv(yi = lnRR,
                  V = VCV, 
                  random = list(~1 | Study_ID,
                                ~1 | Shared_control_ID,
                                ~1 | Cohort_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML", 
                  sparse = TRUE,
                  data = dat)

summary(ma_all)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-248.2436   496.4871   506.4871   524.3289   506.7215   

Variance Components:

            estim    sqrt  nlvls  fixed             factor 
sigma^2.1  0.0490  0.2214     32     no           Study_ID 
sigma^2.2  0.0000  0.0000     88     no  Shared_control_ID 
sigma^2.3  0.0000  0.0000    157     no          Cohort_ID 
sigma^2.4  0.2423  0.4923    263     no             Obs_ID 

Test for Heterogeneity:
Q(df = 262) = 6322.6734, p-val < .0001

Model Results:

estimate      se    tval   df    pval   ci.lb   ci.ub      
  0.2086  0.0597  3.4913  262  0.0006  0.0909  0.3262  *** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
i2 <- round(i2_ml(ma_all), 4)
r2 <- round(r2_ml(ma_all), 4)
I2_Total I2_Study_ID I2_Shared_control_ID I2_Cohort_ID I2_Obs_ID
99.8442 16.7983 0 0 83.0458
R2_marginal R2_conditional
0 0.1682

Table 3 - Heterogeneity of effect size

Code
orchard_plot(ma_all,
              group = "Study_ID",
              xlab = "log response ratio (lnRR)", 
              angle = 45) + 
              scale_x_discrete(labels = c("Overall effect")) + 
              scale_colour_okabe_ito()+
              scale_fill_okabe_ito()

Figure 2— Estimates of overall effect size and 95% confidence intervals

4 Meta-regressions: uni-moderator

Eyespot vs. conspicuous patterns - Is there significant difference of effect size between two patterns?

Code
#normal model
mr_eyespot <- rma.mv(yi = lnRR,
                    V = VCV, 
                    mods = ~ Treatment_stimulus,
                    random = list(~1 | Study_ID,
                                  ~1 | Obs_ID),
                    test = "t",
                    method = "REML", 
                    sparse = TRUE,
                    data = dat)

summary(mr_eyespot)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.4652   494.9305   502.9305   517.1886   503.0867   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0534  0.2312     32     no  Study_ID 
sigma^2.2  0.2426  0.4926    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6290.4224, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.1628, p-val = 0.6869

Model Results:

                           estimate      se    tval   df    pval    ci.lb 
intrcpt                      0.1783  0.0980  1.8199  261  0.0699  -0.0146 
Treatment_stimuluseyespot    0.0442  0.1096  0.4035  261  0.6869  -0.1716 
                            ci.ub    
intrcpt                    0.3712  . 
Treatment_stimuluseyespot  0.2601    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_1 <- round(r2_ml(mr_eyespot), 4)
R2_marginal R2_conditional
0.0014 0.1816

Table XX. Model goodness-of-fit

Code
orchard_plot(mr_eyespot,
            mod = "Treatment_stimulus",
            group = "Study_ID",
            xlab = "log response ratio (lnRR)",
            angle = 45) + 
            scale_colour_okabe_ito(order = 2:3)+
            scale_fill_okabe_ito(order = 2:3)

Figure 3— Eyespot vs. conspicuous patterns
Code
# intercept-removed model
mr_eyespot1 <- rma.mv(yi = lnRR,
                      V = VCV, 
                      mods = ~ Treatment_stimulus -1,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = dat)

summary(mr_eyespot1)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.4652   494.9305   502.9305   517.1886   503.0867   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0534  0.2312     32     no  Study_ID 
sigma^2.2  0.2426  0.4926    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6290.4224, p-val < .0001

Test of Moderators (coefficients 1:2):
F(df1 = 2, df2 = 261) = 5.9203, p-val = 0.0031

Model Results:

                               estimate      se    tval   df    pval    ci.lb 
Treatment_stimulusconspicuous    0.1783  0.0980  1.8199  261  0.0699  -0.0146 
Treatment_stimuluseyespot        0.2225  0.0696  3.1974  261  0.0016   0.0855 
                                ci.ub     
Treatment_stimulusconspicuous  0.3712   . 
Treatment_stimuluseyespot      0.3596  ** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
mr_diameter <- rma.mv(yi = lnRR,
                      V = VCV, 
                      mods = ~ Log_diameter,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = dat)

summary(mr_diameter)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-244.4086   488.8173   496.8173   511.0753   496.9735   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0440  0.2099     32     no  Study_ID 
sigma^2.2  0.2372  0.4870    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6288.3226, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 6.0358, p-val = 0.0147

Model Results:

              estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt        -0.1679  0.1634  -1.0274  261  0.3052  -0.4897  0.1539    
Log_diameter    0.1945  0.0792   2.4568  261  0.0147   0.0386  0.3504  * 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_2 <- round(r2_ml(mr_diameter), 4)
R2_marginal R2_conditional
0.0668 0.213

Table XX. Model goodness-of-fit

Code
bubble_plot(mr_diameter,
            mod = "Log_diameter",
            group = "Study_ID",
            xlab = "Log-transformed diameter")

Code
mr_area <- rma.mv(yi = lnRR,
                  V = VCV, 
                  mods = ~ Log_area,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML", 
                  sparse = TRUE,
                  data = dat)

summary(mr_area)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-243.8857   487.7714   495.7714   510.0295   495.9277   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0483  0.2199     32     no  Study_ID 
sigma^2.2  0.2351  0.4849    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6283.7400, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 6.9863, p-val = 0.0087

Model Results:

          estimate      se     tval   df    pval    ci.lb   ci.ub     
intrcpt    -0.1849  0.1601  -1.1554  261  0.2490  -0.5001  0.1302     
Log_area    0.1106  0.0419   2.6432  261  0.0087   0.0282  0.1931  ** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_3 <- round(r2_ml(mr_area), 4)
R2_marginal R2_conditional
0.0825 0.239

Table XX. Model goodness-of-fit

Code
bubble_plot(mr_area,
            mod = "Log_area",
            group = "Study_ID",
            xlab = "Log-transformed area")

Code
mr_num <- rma.mv(yi = lnRR,
                  V = VCV,
                  mods = ~ Number_pattern,
                  random = list(~1 | Study_ID,
                                ~1 | Shared_control_ID,
                                ~1 | Obs_ID),
                      test = "t",
                      method = "REML",
                      sparse = TRUE,
                      data = dat)

summary(mr_num)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-245.6558   491.3117   501.3117   519.1343   501.5470   

Variance Components:

            estim    sqrt  nlvls  fixed             factor 
sigma^2.1  0.0468  0.2164     32     no           Study_ID 
sigma^2.2  0.0000  0.0000     88     no  Shared_control_ID 
sigma^2.3  0.2386  0.4885    263     no             Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6307.0850, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 4.4504, p-val = 0.0358

Model Results:

                estimate      se     tval   df    pval    ci.lb    ci.ub      
intrcpt           0.3453  0.0877   3.9376  261  0.0001   0.1726   0.5180  *** 
Number_pattern   -0.0579  0.0274  -2.1096  261  0.0358  -0.1119  -0.0039    * 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_4 <- round(r2_ml(mr_num), 4)
R2_marginal R2_conditional
0.0196 0.1805

Table XX. Model goodness-of-fit

Code
bubble_plot(mr_num,
            mod = "Number_pattern",
            group = "Study_ID",
            xlab = "Number of patterns") 

Our dataset includes two types of stimuli: live and artificial. Is there significant difference of effect size between two stimuli?

Code
# normal model
mr_prey_type <- rma.mv(yi = lnRR,
                  V = VCV, 
                  mods = ~ Type_prey,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML", 
                  sparse = TRUE,
                  data = dat)

summary(mr_prey_type)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.2961   494.5922   502.5922   516.8502   502.7484   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0566  0.2380     32     no  Study_ID 
sigma^2.2  0.2415  0.4914    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6259.1358, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.3325, p-val = 0.5647

Model Results:

               estimate      se    tval   df    pval    ci.lb   ci.ub    
intrcpt          0.1845  0.0759  2.4304  261  0.0158   0.0350  0.3339  * 
Type_preyreal    0.0763  0.1324  0.5767  261  0.5647  -0.1843  0.3370    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_5 <- round(r2_ml(mr_prey_type), 4)
R2_marginal R2_conditional
0.0034 0.1927

Table XX. Model goodness-of-fit

Code
orchard_plot(mr_prey_type,
              mod = "Type_prey",
              group = "Study_ID",
              xlab = "Type of prey",
              angle = 45) +
            scale_colour_okabe_ito(order = 4:5)+
            scale_fill_okabe_ito(order = 4:5)

Code
# intercept-removed model
mr_prey_type1 <- rma.mv(yi = lnRR,
                        V = VCV, 
                        mods = ~ Type_prey -1,
                        random = list(~1 | Study_ID,
                                      ~1 | Obs_ID),
                        test = "t",
                        method = "REML", 
                        sparse = TRUE,
                        data = dat)

summary(mr_prey_type1)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.2961   494.5922   502.5922   516.8502   502.7484   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0566  0.2380     32     no  Study_ID 
sigma^2.2  0.2415  0.4914    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6259.1358, p-val < .0001

Test of Moderators (coefficients 1:2):
F(df1 = 2, df2 = 261) = 5.8435, p-val = 0.0033

Model Results:

                     estimate      se    tval   df    pval   ci.lb   ci.ub    
Type_preyartificial    0.1845  0.0759  2.4304  261  0.0158  0.0350  0.3339  * 
Type_preyreal          0.2608  0.1085  2.4042  261  0.0169  0.0472  0.4744  * 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Our dataset includes 4 types of prey type: real butterfly, abstract butterfly, artificial prey. Is there significant difference of effect size between two stimuli?

Code
mr_prey_shape <- rma.mv(yi = lnRR,
                        V = VCV, 
                        mods = ~ Shape_prey - 1,
                        random = list(~1 | Study_ID,
                                      ~1 | Shared_control_ID,
                                      ~1 | Obs_ID),
                                      test = "t",
                                      method = "REML", 
                                      sparse = TRUE,
                                      data = dat)

summary(mr_prey_shape)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-243.5524   487.1049   501.1049   526.0027   501.5511   

Variance Components:

            estim    sqrt  nlvls  fixed             factor 
sigma^2.1  0.0560  0.2366     32     no           Study_ID 
sigma^2.2  0.0000  0.0000     88     no  Shared_control_ID 
sigma^2.3  0.2392  0.4890    263     no             Obs_ID 

Test for Residual Heterogeneity:
QE(df = 259) = 6062.6887, p-val < .0001

Test of Moderators (coefficients 1:4):
F(df1 = 4, df2 = 259) = 3.7576, p-val = 0.0054

Model Results:

                              estimate      se    tval   df    pval    ci.lb 
Shape_preyabstract_butterfly    0.3222  0.1078  2.9889  259  0.0031   0.1099 
Shape_preyabstract_stimuli      0.0208  0.1868  0.1112  259  0.9115  -0.3470 
Shape_preybutterfly             0.2604  0.1080  2.4120  259  0.0166   0.0478 
Shape_preycaterpillar           0.0663  0.1283  0.5164  259  0.6060  -0.1864 
                               ci.ub     
Shape_preyabstract_butterfly  0.5345  ** 
Shape_preyabstract_stimuli    0.3886     
Shape_preybutterfly           0.4731   * 
Shape_preycaterpillar         0.3190     

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_6 <- round(r2_ml(mr_prey_shape), 4)
R2_marginal R2_conditional
0.0527 0.2325

Table XX. Model goodness-of-fit

Code
orchard_plot(mr_prey_shape,
              mod = "Shape_prey",
              group = "Study_ID",
              xlab = "Shape of prey",
              angle = 45) +
              scale_colour_okabe_ito(order = 6:9)+
              scale_fill_okabe_ito(order = 6:9)

5 Correlation visualisation and choose moderators

Before we run multi-moderator meta-regressions, we need to consider the correlation between moderators. Area, diameter and background seem to be correlated. We visualised the correlation between these variables.

Code
# correlation between continuous variables
corr_cont <- round(cor(dat[, c("Diameter_pattern", "Area_pattern", "Number_pattern", "Area_background")]),2)

p1 <- ggcorrplot(corr_cont, hc.order = TRUE, lab = TRUE, outline.col = "white", colors = c("#6D9EC1", "white", "#E46726"), title = "(a) Continuous variables")

corr_cont_log <- round(cor(dat[, c("Log_diameter", "Log_area", "Number_pattern", "Log_background")]),2)

p2 <- ggcorrplot(corr_cont_log, hc.order = TRUE, lab = TRUE, outline.col = "white", colors = c("#6D9EC1", "white", "#E46726"), title = "(b) Log-transormed continuous variables")

p1 + p2 +plot_layout(guides = 'collect')

Figure 4— Correlation between coninuous moderators
Code
dat1 <- dat %>%
  select(Treatment_stimulus, Type_prey, Shape_prey)

assocMatrix(dat1)

Figure 5— Correlation between categorical variables

ASK Why y-axis is shown “NA”?

We used model R2 values to find better model and modelator VIF values to check multicollinearity between moderators. Higher R2 indicates better model and VIF > 2 indicates multicollinearity.

5.0.1 R2

Code
r2_area <- rma.mv(yi = lnRR,
                  V = VCV,
                  mods = ~ Log_area + Number_pattern,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML",
                  sparse = TRUE,
                  data = dat)

r2_diameter <- rma.mv(yi = lnRR,
                      V = VCV,
                      mods = ~ Log_diameter + Number_pattern,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML",
                      sparse = TRUE,
                      data = dat)

# check r2 values
r2_ml(r2_area)
   R2_marginal R2_conditional 
    0.07652345     0.22726877 
Code
r2_ml(r2_diameter)
   R2_marginal R2_conditional 
    0.06535709     0.20793579 

It seems area is a better predictor than diameter .

5.0.2 VIF

Code
vif_1 <- rma.mv(yi = lnRR,
                V = VCV,
                mods = ~ Log_area + Number_pattern + 
                        Treatment_stimulus + Type_prey + Shape_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID), 
                test = "t",
                method = "REML",
                sparse = TRUE,
                data = dat)

# we get Warning message:
# Redundant predictors dropped from the model. 

vif.rma(vif_1)

Log_area  Number_pattern  Treatment_stimuluseyespot  Type_preyreal  Shape_preyabstract_stimuli  Shape_preycaterpillar 
  2.9558          1.2707                     1.4984         1.8794                      3.1362                 1.4621 

Shape_prey has high VIF value. We removed it from the model and run new model.

Code
vif_2 <- rma.mv(yi = lnRR,
                V = VCV,
                mods = ~ Log_area + Number_pattern + 
                        Treatment_stimulus + Type_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID), 
                test = "t",
                method = "REML",
                sparse = TRUE,
                data = dat)

vif.rma(vif_2)

Log_area  Number_pattern  Treatment_stimuluseyespot  Type_preyreal 
  1.1194          1.1929                     1.0945         1.1986 

It looks good. We will use this model.

6 Meta-regressions: multi-moderator

Include all moderators in the model.

Code
mr_all <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Treatment_stimulus + Log_area 
                + Number_pattern + Type_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_all)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-240.1811   480.3622   494.3622   519.2329   494.8102   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0546  0.2337     32     no  Study_ID 
sigma^2.2  0.2332  0.4829    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 258) = 6150.9600, p-val < .0001

Test of Moderators (coefficients 2:5):
F(df1 = 4, df2 = 258) = 2.7004, p-val = 0.0312

Model Results:

                           estimate      se     tval   df    pval    ci.lb 
intrcpt                     -0.0899  0.2139  -0.4202  258  0.6747  -0.5110 
Treatment_stimuluseyespot    0.0115  0.1137   0.1014  258  0.9193  -0.2124 
Log_area                     0.0982  0.0454   2.1637  258  0.0314   0.0088 
Number_pattern              -0.0503  0.0299  -1.6798  258  0.0942  -0.1092 
Type_preyreal                0.1900  0.1426   1.3320  258  0.1840  -0.0909 
                            ci.ub    
intrcpt                    0.3313    
Treatment_stimuluseyespot  0.2355    
Log_area                   0.1876  * 
Number_pattern             0.0087  . 
Type_preyreal              0.4709    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_all)
   R2_marginal R2_conditional 
    0.08026645     0.25480604 

Only include moderators related to conspicuousness (pattern area and number of patterns) in the model.

Code
mr_cons <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Log_area + Number_pattern,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_cons)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-242.5573   485.1145   495.1145   512.9179   495.3507   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0458  0.2141     32     no  Study_ID 
sigma^2.2  0.2349  0.4847    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 260) = 6250.3462, p-val < .0001

Test of Moderators (coefficients 2:3):
F(df1 = 2, df2 = 260) = 4.4568, p-val = 0.0125

Model Results:

                estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt          -0.0234  0.1958  -0.1194  260  0.9050  -0.4089  0.3621    
Log_area          0.0913  0.0435   2.0993  260  0.0368   0.0057  0.1769  * 
Number_pattern   -0.0394  0.0286  -1.3754  260  0.1702  -0.0957  0.0170    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_cons)
   R2_marginal R2_conditional 
    0.07652345     0.22726877 
Code
bubble_plot(mr_cons,
            mod = "Log_area",
            group = "Study_ID",
            xlab = "Log-transformed area")

Code
mr_cons_1 <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Log_area + Number_pattern + Treatment_stimulus,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_cons_1)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-241.7781   483.5563   495.5563   516.8973   495.8896   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0505  0.2247     32     no  Study_ID 
sigma^2.2  0.2350  0.4848    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 259) = 6249.3520, p-val < .0001

Test of Moderators (coefficients 2:4):
F(df1 = 3, df2 = 259) = 3.0157, p-val = 0.0305

Model Results:

                           estimate      se     tval   df    pval    ci.lb 
intrcpt                     -0.0581  0.2097  -0.2769  259  0.7821  -0.4710 
Log_area                     0.0919  0.0445   2.0647  259  0.0399   0.0043 
Number_pattern              -0.0404  0.0289  -1.3965  259  0.1638  -0.0973 
Treatment_stimuluseyespot    0.0522  0.1080   0.4830  259  0.6295  -0.1605 
                            ci.ub    
intrcpt                    0.3549    
Log_area                   0.1795  * 
Number_pattern             0.0166    
Treatment_stimuluseyespot  0.2648    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_cons_1)
   R2_marginal R2_conditional 
    0.08056427     0.24319979 
Code
mr_pre <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Treatment_stimulus + Type_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_pre)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-246.5407   493.0815   503.0815   520.8849   503.3177   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0603  0.2456     32     no  Study_ID 
sigma^2.2  0.2421  0.4921    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 260) = 6227.4232, p-val < .0001

Test of Moderators (coefficients 2:3):
F(df1 = 2, df2 = 260) = 0.2099, p-val = 0.8108

Model Results:

                           estimate      se    tval   df    pval    ci.lb 
intrcpt                      0.1654  0.1034  1.6003  260  0.1107  -0.0381 
Treatment_stimuluseyespot    0.0311  0.1166  0.2669  260  0.7898  -0.1985 
Type_preyreal                0.0688  0.1404  0.4899  260  0.6246  -0.2077 
                            ci.ub    
intrcpt                    0.3690    
Treatment_stimuluseyespot  0.2607    
Type_preyreal              0.3452    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_pre)
   R2_marginal R2_conditional 
   0.004150357    0.202792083 

7 Publication bias

ASK: Is this correct? Also, how do I number sub-captions correctly?

Code
# funnel plot - standard error
funnel(ma_all, yaxis = "sei", 
      xlab = "Standarised residuals",
      ylab = "Precision (inverse of SE)" )

# funnel plot - inverse of standard error
f2 <- funnel(ma_all, yaxis = "seinv", 
      xlab = "Standarized residuals",
      ylab = "Precision (inverse of SE)",  col = c(alpha("orange", 0.5)))

# Cut outliners (cut out plots with a value of 80 or more) I think this is better…
f3 <- funnel(ma_all, yaxis = "seinv", 
      xlab = "Standarized residuals",
      ylab = "Precision (inverse of SE)",
      xlim = c(-4,4), 
      ylim = c(0.8, 80), 
      col = c(alpha("mediumvioletred", 0.5)))

Figure 6— Effect size and its standard error

Figure 7— Effect size and its inverse standard error

Figure 8— Effect size and its standard error

Figure 9— Funnel plot of effect size and its standard error

Code
df_bias <- dat %>% mutate(sqrt_inv_e_n = sqrt((Cn + Tn)/(Cn * Tn)))

bias_model <- rma.mv(yi = lnRR,
                      V = lnRR_var, 
                      mods = ~ 1 + sqrt_inv_e_n,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = df_bias)

summary(bias_model)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-245.8643   491.7285   499.7285   513.9866   499.8848   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0959  0.3096     32     no  Study_ID 
sigma^2.2  0.2159  0.4647    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 4119.2520, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.0088, p-val = 0.9254

Model Results:

              estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt         0.2428  0.1380   1.7601  261  0.0796  -0.0288  0.5145  . 
sqrt_inv_e_n   -0.0363  0.3879  -0.0937  261  0.9254  -0.8001  0.7274    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
bubble_plot(bias_model,
            mod = "sqrt_inv_e_n",
            group = "Study_ID",
            xlab = "Square root of inverse of effective sample size")

Figure 10— Egger’s test of publication bias
Code
year_model <- rma.mv(yi = lnRR,
                      V = VCV, 
                      mods = ~  1 + Year,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = df_bias)

summary(year_model)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.3645   494.7289   502.7289   516.9870   502.8852   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0520  0.2281     32     no  Study_ID 
sigma^2.2  0.2428  0.4927    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6249.4316, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.0057, p-val = 0.9399

Model Results:

         estimate       se     tval   df    pval     ci.lb    ci.ub    
intrcpt    1.1904  12.9935   0.0916  261  0.9271  -24.3950  26.7758    
Year      -0.0005   0.0065  -0.0755  261  0.9399   -0.0132   0.0122    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
bubble_plot(year_model,
            mod = "Year",
            group = "Study_ID",
            xlab = "Year of publication")

Figure 11— Decline effect

8 Additional analyses

We compared effect size between two datasets (predator and prey) to see whether there is any difference in effect size between two datasets.

Code
mr_dataset <- rma.mv(yi = lnRR,
                      V = VCV,
                      mods = ~ Dataset,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML",
                      sparse = TRUE,
                      data = dat)

summary(mr_dataset)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.0730   494.1459   502.1459   516.4040   502.3022   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0522  0.2285     32     no  Study_ID 
sigma^2.2  0.2420  0.4919    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6222.4453, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.5964, p-val = 0.4406

Model Results:

             estimate      se    tval   df    pval    ci.lb   ci.ub    
intrcpt        0.1598  0.0880  1.8162  261  0.0705  -0.0135  0.3331  . 
Datasetprey    0.0919  0.1191  0.7723  261  0.4406  -0.1425  0.3264    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
orchard_plot(mr_dataset,
            mod = "Dataset",
            group = "Study_ID",
            xlab = "Dataset", 
            angle = 45) 

Figure 12— Effect size and dataset - predator and prey
Code
mr_background <- rma.mv(yi = lnRR,
                        V = VCV, 
                        mods = ~ Log_background,
                        random = list(~1 | Study_ID,
                                      ~1 | Obs_ID),
                        test = "t",
                        method = "REML",
                        sparse = TRUE,
                        data = dat)

summary(mr_background)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.0585   494.1170   502.1170   516.3751   502.2732   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0556  0.2358     32     no  Study_ID 
sigma^2.2  0.2421  0.4920    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 6234.1356, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.2144, p-val = 0.6437

Model Results:

                estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt           0.4058  0.4286   0.9468  261  0.3446  -0.4382  1.2498    
Log_background   -0.0282  0.0609  -0.4631  261  0.6437  -0.1480  0.0917    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
bubble_plot(mr_background,
            mod = "Log_background",
            group = "Study_ID",
            xlab = "Log-transformed backgroud")

Figure 13— Effect size and background area

9 Figure galary

:::{.panel-tabset}

9.1 Discrete moderators

Figure - discrete moderators

9.2 Continuous moderators

Figure - continuous moderators

9.3 Publication bias

Figure - publication bias

10 R session infomation

R version 4.3.1 (2023-06-16)

Platform: x86_64-apple-darwin20 (64-bit)

locale: en_US.UTF-8||en_US.UTF-8||en_US.UTF-8||C||en_US.UTF-8||en_US.UTF-8

attached base packages: parallel, stats, graphics, grDevices, utils, datasets, methods and base

other attached packages: uncoveR(v.0.2.0), orchaRd(v.2.0), miWQS(v.0.4.4), metafor(v.4.2-0), numDeriv(v.2016.8-1.1), metadat(v.1.2-0), Matrix(v.1.6-1), patchwork(v.1.1.3), lubridate(v.1.9.2), forcats(v.1.0.0), stringr(v.1.5.0), dplyr(v.1.1.3), purrr(v.1.0.2), readr(v.2.1.4), tidyr(v.1.3.0), tibble(v.3.2.1), tidyverse(v.2.0.0), knitr(v.1.43), here(v.1.0.1), ggcorrplot(v.0.1.4), ggplot2(v.3.4.3), ggokabeito(v.0.1.0), DT(v.0.28) and ape(v.5.7-1)

loaded via a namespace (and not attached): gridExtra(v.2.3), glm2(v.1.2.1), sandwich(v.3.0-2), rlang(v.1.1.1), magrittr(v.2.0.3), compiler(v.4.3.1), mgcv(v.1.8-42), reshape2(v.1.4.4), vctrs(v.0.6.3), quantreg(v.5.97), pkgconfig(v.2.0.3), fastmap(v.1.1.1), backports(v.1.4.1), mcmc(v.0.9-7), ellipsis(v.0.3.2), labeling(v.0.4.3), pander(v.0.6.5), utf8(v.1.2.3), rmarkdown(v.2.24), tzdb(v.0.4.0), ggbeeswarm(v.0.7.2), MatrixModels(v.0.5-2), xfun(v.0.40), cachem(v.1.0.8), jsonlite(v.1.8.7), gmm(v.1.8), cluster(v.2.1.4), R6(v.2.5.1), bslib(v.0.5.1), stringi(v.1.7.12), Rsolnp(v.1.16), rlist(v.0.4.6.2), rpart(v.4.1.19), jquerylib(v.0.1.4), estimability(v.1.4.1), Rcpp(v.1.0.11), zoo(v.1.8-12), tmvmixnorm(v.1.1.1), base64enc(v.0.1-3), pacman(v.0.5.1), splines(v.4.3.1), nnet(v.7.3-19), timechange(v.0.2.0), tidyselect(v.1.2.0), rstudioapi(v.0.15.0), yaml(v.2.3.7), plyr(v.1.8.8), lattice(v.0.21-8), withr(v.2.5.0), coda(v.0.19-4), evaluate(v.0.21), tmvtnorm(v.1.5), foreign(v.0.8-84), survival(v.3.5-5), pillar(v.1.9.0), checkmate(v.2.2.0), stats4(v.4.3.1), generics(v.0.1.3), rprojroot(v.2.0.3), invgamma(v.1.1), mathjaxr(v.1.6-0), truncnorm(v.1.0-9), hms(v.1.1.3), munsell(v.0.5.0), scales(v.1.2.1), xtable(v.1.8-4), glue(v.1.6.2), emmeans(v.1.8.8), Hmisc(v.5.1-0), tools(v.4.3.1), data.table(v.1.14.8), SparseM(v.1.81), matrixNormal(v.0.1.1), mvtnorm(v.1.2-3), grid(v.4.3.1), MCMCpack(v.1.6-3), crosstalk(v.1.2.0), colorspace(v.2.1-0), nlme(v.3.1-162), beeswarm(v.0.4.0), condMVNorm(v.2020.1), vipor(v.0.4.5), htmlTable(v.2.4.1), latex2exp(v.0.9.6), Formula(v.1.2-5), cli(v.3.6.1), fansi(v.1.0.4), gtable(v.0.3.4), sass(v.0.4.7), digest(v.0.6.33), farver(v.2.1.1), htmlwidgets(v.1.6.2), htmltools(v.0.5.6), lifecycle(v.1.0.3) and MASS(v.7.3-60)